Ultrasound (US) examination is an essential tool to monitor fetus and mother along pregnancy, providing an economic and non-invasive way to observe the development of all fetal organs and maternal structures. Several measures obtained from maternal-fetal scans are commonly used to monitor fetal growth
All images were manually labeled by an expert maternal fetal clinician. Images were divided into 6 classes: four of the most widely used fetal anatomical planes (Abdomen, Brain, Femur and Thorax), the mother’s cervix (widely used for prematurity screening) and a general category to include any other less common image plane
!wget https://zenodo.org/record/3904280/files/FETAL_PLANES_ZENODO.zip?download=1
# !mv ./FETAL_PLANES_ZENODO.zip?download=1 ./dataset.zip
!unzip ../input/deep-learning-project/FETAL_PLANES_ZENODO.zip?download=1
!pip install wandb
import os
import sys
import json
import glob
import random
import collections
import time
import re
import numpy as np
import pandas as pd
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import pydicom as dicom
import cv2
import os
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras_preprocessing.image.dataframe_iterator import DataFrameIterator
from keras_preprocessing.image import ImageDataGenerator
import matplotlib.pylab as plt
from tensorflow.keras.utils import Sequence
from keras.utils import data_utils
from keras.applications.vgg19 import VGG19
from tensorflow.keras.applications.resnet50 import ResNet50
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
import tensorflow as tf
from keras.layers.convolutional import Conv2D
from keras.layers.core import Activation
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D,GlobalAveragePooling2D
import pandas as pd
from glob import glob
from keras import backend as K
from tensorflow.keras.models import load_model
import wandb
from wandb.keras import WandbCallback
wandb.login(key = "6e8945216f101fea1ca04d18131e4318e57780c8")
wandb.init(project="Resnet-Tracking", entity="oussamayousr")
wandb_callback = wandb.keras.WandbCallback(log_weights=True)
list_images = os.listdir('./Images')
list_images[0]
print(type(list_images))
len(list_images)
print(type(list_images))
len(list_images)
image = cv2.imread("./Images/"+list_images[200])
plt.figure(figsize=(12,8))
plt.imshow(image)
train_data = np.array([]).reshape(0, 224,224,3)
plt.figure(figsize=(15,15))
for i in np.arange(9) :
data = cv2.imread("./Images/"+list_images[i+100])
data = cv2.resize(data,(224,224))
data = data.reshape((1,224,224,3))
train_data = np.vstack([train_data, data])
plt.subplot(3,3,i+1)
plt.imshow(data[0], cmap = 'nipy_spectral_r')
plt.axis('off')
!pip install openpyxl
import pandas as pd
file_name = './FETAL_PLANES_DB_data.xlsx'
df = pd.read_excel(file_name, index_col=0)
df
df["Image_name"] = df.index
df
df.reset_index(drop=True, inplace=True)
df
df.rename(columns={'Train ': 'Train'}, inplace=True)
df.Train[df.Train == 0]
df.Image_name = "./Images/"+df.Image_name+".png"
df
df.Plane.unique()
# df['Plane'] = df['Plane'].map({'Other': 0, 'Maternal cervix': 1,'Fetal abdomen': 2,'Fetal brain':3,
# 'Fetal femur' : 4,'Fetal thorax':5})
df = df.sample(frac=1).reset_index(drop=True)
df
df.Plane.value_counts().sort_values().plot(kind = 'barh')
df.Plane.value_counts().sort_values().plot(kind = 'bar')
df.Plane.size
df[df["Train"] == 0]
df_test = df[df["Train"] == 0]
df_train = df[df["Train"] == 1]
df_train.reset_index(drop=True, inplace=True)
df_test.reset_index(drop=True, inplace=True)
image_train_array = df_train.Image_name.to_numpy()
label_train_array = df_train.Plane.to_numpy()
image_test_array = df_test.Image_name.to_numpy()
label_test_array = df_test.Plane.to_numpy()
len(image_train_array)
len(df[df.Train == 1].Image_name.to_numpy())
In this part of the notebook, we have used Two Methode to Generate Data, the first one using Sequence Class in Tensorflow that enable users to costumize their own Data Generators. and the Second method is Data generation using flow_from_dataframe .
class ImageFrameGenerator(tf.keras.utils.Sequence):
'Generates data for Keras'
def __init__(self,train_ID ,batch_size=1,image_list_path = image_train_array ,label_list = label_train_array,
image_batch = 32,dataset = df_test, dim=(256,256), shuffle=True ):
'Initialization'
self.batch_size = batch_size
self.image_list_path = image_list_path
self.label_list = label_list
self.image_batch = image_batch
self.train_ID = train_ID
self.dataset = dataset
self.shuffle = shuffle
self.dim = dim
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.train_ID)))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = [k for k in indexes]
#list_IDs_temp = [20]
# Generate data
data, labels = self.__data_generation(list_IDs_temp)
return data/255, labels
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.train_ID))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
data = np.array([]).reshape(0,256,256,3)
labels = np.array([])
first_index = list_IDs_temp[0] * self.image_batch
for i in np.arange(32):
#img = tf.keras.preprocessing.image.array_to_img(data)
image_name = f"./Images/{self.image_list_path[first_index+i]}.png"
image = cv2.imread(image_name)
image = cv2.resize(image,(256,256))
image = image.reshape((1,256,256,3))
data = np.vstack([data,image])
labels = np.append(labels,self.label_list[first_index+i])
return data, labels
image = cv2.imread("./Images/Patient01063_Plane5_2_of_2.png")
# Parameters
train_params = {'dim': (256,256),
'batch_size': 1,
'shuffle': False,
'dataset':df_train,
'image_list_path': image_train_array,
'label_list':label_train_array}
test_params = {'dim': (256,256),
'batch_size': 1,
'shuffle': False,
'dataset':df_test,
'image_list_path': image_test_array,
'label_list':label_test_array}
training_generator = ImageFrameGenerator(np.arange(df_train.Train.size//32-1),**train_params)
testing_generator = ImageFrameGenerator(np.arange(df_test.Train.size//32 -1),**test_params)
datagen=ImageDataGenerator(rescale=1./255.,validation_split=0.25)
x , y = training_generator.__getitem__(0)
x.shape
DIM = 256
NB_CHANNELS = 3
NB_CLASSES = 6
df_test
def get_train_generator(df_train,df_test, image_dir = None, x_col = "Image_name", y_cols = "Plane",
shuffle=True, batch_size=64, seed=1, target_w = 256, target_h = 256):
### Perform data augmentation here
image_generator = ImageDataGenerator(rescale=1/255 ,rotation_range = 5, shear_range = 0.02,zoom_range = 0.02,
samplewise_center=True, samplewise_std_normalization= True)
### Create the image generator
train_generator=image_generator.flow_from_dataframe(
dataframe=df_train,
directory=None,
x_col="Image_name",
y_col="Plane",
subset="training",
batch_size=batch_size,
seed=seed,
shuffle=shuffle,
class_mode="categorical",
target_size=(target_w,target_h)
)
valid_generator=image_generator.flow_from_dataframe(
dataframe=df_test,
directory=None,
x_col="Image_name",
y_col="Plane",
subset="training",
batch_size=batch_size,
seed=seed,
shuffle=shuffle,
class_mode="categorical",
target_size=(target_w,target_h)
)
return train_generator, valid_generator
train_generator, valid_generator = get_train_generator(df_train,df_test, image_dir = None, x_col = "Image_name", y_cols = "Plane",
shuffle=True, batch_size=64, seed=1, target_w = 256, target_h = 256)
the First model that we've build is a Baseline model, and Since the is somehow unbalanced, we've tried to Evaluate our models using multiple metrics, F1,Precision, Recall ...¶
base_model = Sequential()
base_model.add(Conv2D(32, (3, 3), padding="same",input_shape = (DIM , DIM , NB_CHANNELS)))
base_model.add(Activation("relu"))
base_model.add(BatchNormalization(axis=1))
base_model.add(MaxPooling2D(pool_size=(3, 3)))
base_model.add(Conv2D(64, (3, 3), padding="same"))
base_model.add(Activation("relu"))
base_model.add(BatchNormalization(axis=1))
base_model.add(Conv2D(64, (3, 3), padding="same"))
base_model.add(Activation("relu"))
base_model.add(BatchNormalization(axis=1))
base_model.add(MaxPooling2D(pool_size=(2, 2)))
base_model.add(Conv2D(128, (3, 3), padding="same"))
base_model.add(Activation("relu"))
base_model.add(BatchNormalization(axis=1))
base_model.add(Conv2D(128, (3, 3), padding="same"))
base_model.add(Activation("relu"))
base_model.add(BatchNormalization(axis=1))
base_model.add(MaxPooling2D(pool_size=(2, 2)))
base_model.add(Flatten())
base_model.add(Dense(1024))
base_model.add(Activation("relu"))
base_model.add(BatchNormalization())
base_model.add(Dense(6))
base_model.add(Activation("softmax"))
base_model.build((0,256,256,3))
base_model.summary()
tf.keras.utils.plot_model(
base_model, to_file='base_model.png'
)
from keras import backend as K
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
Model Training : We have used Adam Optimizer, great Optimizer for Classification Tasks, Then we've used a Learning-rate Decay so we avoid the Stagnation of the model performance during training, for the loss, since we're using the softmax as the activation function of the head Layer and the Labels are hot-encoded we need to use the Categorical_Crossentropy loss for 15 Epochs
learning_rate_reduction = ReduceLROnPlateau(monitor='accuracy',
patience = 2,
verbose=1,
factor=0.1,
min_lr=0.000001)
opt = tf.keras.optimizers.Adam(learning_rate=0.0001)
base_model.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['accuracy',f1_m,precision_m, recall_m])
history = base_model.fit(train_generator,validation_data = valid_generator, epochs = 15,callbacks=[learning_rate_reduction,wandb_callback])
from matplotlib import pyplot
# plot loss during training
pyplot.subplot(211)
pyplot.title('Loss')
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
# plot accuracy during training
pyplot.subplot(212)
pyplot.title('Accuracy')
pyplot.plot(history.history['accuracy'], label='train')
pyplot.plot(history.history['val_accuracy'], label='test')
pyplot.legend()
pyplot.show()
path = './base_model'
base_model.save(path, save_format='tf')
model = tf.keras.models.load_model('./base_model', compile=False)
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.applications import EfficientNetB6
inputs = layers.Input(shape=(128, 128, 3))
outputs = EfficientNetB6(include_top=False, weights="imagenet")(inputs)
outputs = layers.Flatten()(outputs)
outputs = layers.Dense(512, activation = 'relu')(outputs)
outputs = layers.Dropout(0.2)(outputs)
outputs = layers.Dense(256 , activation = 'relu')(outputs)
outputs = layers.Dense(6 , activation = 'softmax')(outputs)
EfficientNet = tf.keras.Model(inputs, outputs)
EfficientNet.summary()
tf.keras.utils.plot_model(
EfficientNet, to_file='base_model.png'
)
train_generator, valid_generator = get_train_generator(df_train,df_test, image_dir = None, x_col = "Image_name", y_cols = "Plane",
shuffle=True, batch_size=32, seed=1, target_w = 128, target_h = 128)
learning_rate_reduction = ReduceLROnPlateau(monitor='accuracy',
patience = 2,
verbose=1,
factor=0.1,
min_lr=0.000001)
opt = tf.keras.optimizers.Adam(learning_rate=0.0001)
EfficientNet.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['accuracy',f1_m,precision_m, recall_m])
history = EfficientNet.fit(train_generator,validation_data = valid_generator, epochs = 15,callbacks=[learning_rate_reduction,wandb_callback])
from matplotlib import pyplot
# plot loss during training
pyplot.subplot(211)
pyplot.title('Loss')
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
# plot accuracy during training
pyplot.subplot(212)
pyplot.title('Accuracy')
pyplot.plot(history.history['accuracy'], label='train')
pyplot.plot(history.history['val_accuracy'], label='test')
pyplot.legend()
pyplot.show()
from tensorflow.keras.models import load_model
path = './efficientNet_model'
EfficientNet.save(path, save_format='tf')
model = tf.keras.models.load_model('./efficientNet_model', compile=False)
we've Costumized the model by adding three Dense layers at the Top of the model, the model may need more than 50 Epochs to start seeing an incredible Result, Unfortantly we don't have enough ressources, so 20 Epochs was Fair enough.¶
encoder_resnet50 = tf.keras.applications.ResNet50(include_top=False, weights=None, input_shape=(DIM,DIM ,NB_CHANNELS))
new_model = Sequential(name = 'encoder_resnet_50')
new_model.add(encoder_resnet50)
new_model.add(GlobalAveragePooling2D())
new_model.add(Dense(512))
new_model.add(Dense(256))
new_model.add(Dense(6, activation='softmax'))
resnet50 = new_model
resnet50.summary()
tf.keras.utils.plot_model(
resnet50, to_file='base_model.png'
)
train_generator, valid_generator = get_train_generator(df_train,df_test, image_dir = None, x_col = "Image_name", y_cols = "Plane",
shuffle=True, batch_size=64, seed=1, target_w = 256, target_h = 256)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
patience = 2,
verbose=1,
factor=0.1,
min_lr=0.000001)
opt = tf.keras.optimizers.Adam(learning_rate=0.0001)
resnet50.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['accuracy',f1_m,precision_m, recall_m])
history = resnet50.fit(train_generator,validation_data = valid_generator, epochs = 20,callbacks=[learning_rate_reduction,wandb_callback])
resnet50.evaluate(valid_generator)
from matplotlib import pyplot
# plot loss during training
pyplot.subplot(211)
pyplot.title('Loss')
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
# plot accuracy during training
pyplot.subplot(212)
pyplot.title('Accuracy')
pyplot.plot(history.history['accuracy'], label='train')
pyplot.plot(history.history['val_accuracy'], label='test')
pyplot.legend()
pyplot.show()
from tensorflow.keras import layers
input_shape = (256,256,3)
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 64
num_epochs = 100
image_size = 256 # We'll resize input images to this size
patch_size = 6 # Size of the patches to be extract from the input images
num_patches = (image_size // patch_size) ** 2
projection_dim = 64
num_heads = 8
transformer_units = [
projection_dim * 2,
projection_dim,
] # Size of the transformer layers
transformer_layers = 4
mlp_head_units = [64, 32] # Size of the dense layers of the final classifier
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
class Patches(layers.Layer):
def __init__(self, patch_size):
super(Patches, self).__init__()
self.patch_size = patch_size
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.patch_size, 1],
strides=[1, self.patch_size, self.patch_size, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
patch_dims = patches.shape[-1]
patches = tf.reshape(patches, [batch_size, -1, patch_dims])
return patches
class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super(PatchEncoder, self).__init__()
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
self.position_embedding = layers.Embedding(
input_dim=num_patches, output_dim=projection_dim
)
def call(self, patch):
positions = tf.range(start=0, limit=self.num_patches, delta=1)
encoded = self.projection(patch) + self.position_embedding(positions)
return encoded
def create_vit_classifier():
inputs = layers.Input(shape=input_shape)
# Create patches.
patches = Patches(patch_size)(inputs)
# Encode patches.
encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
# Create multiple layers of the Transformer block.
for _ in range(transformer_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=projection_dim, dropout=0.1
)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
# MLP.
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
# Skip connection 2.
encoded_patches = layers.Add()([x3, x2])
# Create a [batch_size, projection_dim] tensor.
representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = layers.Flatten()(representation)
representation = layers.Dropout(0.5)(representation)
# Add MLP.
features = mlp(representation, hidden_units=mlp_head_units, dropout_rate=0.5)
# Classify outputs.
logits = layers.Dense(6, activation='softmax')(features)
# Create the Keras model.
model = keras.Model(inputs=inputs, outputs=logits)
return model
def run_experiment(model):
learning_rate_reduction = ReduceLROnPlateau(monitor='accuracy',
patience = 2,
verbose=1,
factor=0.1,
min_lr=0.000001)
opt = tf.keras.optimizers.Adam(learning_rate=0.0001)
model.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(train_generator,validation_data = valid_generator, epochs = 1,callbacks = [learning_rate_reduction,wandb_callback])
return history
vit_classifier = create_vit_classifier()
print(vit_classifier.summary())
tf.keras.utils.plot_model(
vit_classifier, to_file='base_model.png'
)
history = run_experiment(vit_classifier)
Metrics Charts¶
DenseNet = tf.keras.applications.DenseNet121(
include_top=False,
weights="imagenet",
input_tensor=None,
input_shape=(256,256 ,NB_CHANNELS),
pooling=None,
)
new_model = Sequential(name = 'DenseNet')
new_model.add(DenseNet)
new_model.add(GlobalAveragePooling2D())
new_model.add(Dense(512))
new_model.add(Dense(256))
new_model.add(Dense(6, activation='softmax'))
DenseNet = new_model
DenseNet.summary()
tf.keras.utils.plot_model(
DenseNet, to_file='base_model.png'
)
train_generator, valid_generator = get_train_generator(df_train,df_test, image_dir = None, x_col = "Image_name", y_cols = "Plane",
shuffle=True, batch_size=64, seed=1, target_w = 256, target_h = 256)
from keras import backend as K
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
patience = 2,
verbose=1,
factor=0.1,
min_lr=0.000001)
opt = tf.keras.optimizers.Adam(learning_rate=0.0001)
DenseNet.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['accuracy',f1_m,precision_m, recall_m])
history = DenseNet.fit(train_generator,validation_data = valid_generator, epochs = 5,callbacks=[learning_rate_reduction,wandb_callback])
from matplotlib import pyplot
# plot loss during training
pyplot.subplot(211)
pyplot.title('Loss')
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
# plot accuracy during training
pyplot.subplot(212)
pyplot.title('Accuracy')
pyplot.plot(history.history['accuracy'], label='train')
pyplot.plot(history.history['val_accuracy'], label='test')
pyplot.legend()
pyplot.show()
VGG19 = tf.keras.applications.VGG19(
include_top=False,
weights="imagenet",
input_tensor=None,
input_shape=(256,256 ,NB_CHANNELS),
pooling=None,
classifier_activation="softmax",
)
new_model = Sequential(name = 'VGG19')
new_model.add(VGG19)
new_model.add(GlobalAveragePooling2D())
new_model.add(Dense(512))
new_model.add(Dense(256))
new_model.add(Dense(6, activation='softmax'))
VGG19 = new_model
VGG19.summary()
tf.keras.utils.plot_model(
VGG19, to_file='base_model.png'
)
train_generator, valid_generator = get_train_generator(df_train,df_test, image_dir = None, x_col = "Image_name", y_cols = "Plane",
shuffle=True, batch_size=64, seed=1, target_w = 256, target_h = 256)
from keras import backend as K
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
learning_rate_reduction = ReduceLROnPlateau(monitor='accuracy',
patience = 2,
verbose=1,
factor=0.1,
min_lr=0.000001)
opt = tf.keras.optimizers.Adam(learning_rate=0.0001)
VGG19.compile(optimizer = opt, loss='categorical_crossentropy', metrics=['accuracy',f1_m,precision_m, recall_m])
history = VGG19.fit(train_generator,validation_data = valid_generator, epochs = 5,callbacks = [learning_rate_reduction,wandb_callback])
VGG19.evaluate(valid_generator)
# plot loss during training
pyplot.subplot(211)
pyplot.title('Loss')
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
# plot accuracy during training
pyplot.subplot(212)
pyplot.title('Accuracy')
pyplot.plot(history.history['accuracy'], label='train')
pyplot.plot(history.history['val_accuracy'], label='test')
pyplot.legend()
pyplot.show()
Metrics Charts¶
Metrics Report¶